bitkeeper revision 1.1236.32.3 (42360bc220YuUEOiskz-8He4ZH_lCw)
authormafetter@fleming.research <mafetter@fleming.research>
Mon, 14 Mar 2005 22:10:10 +0000 (22:10 +0000)
committermafetter@fleming.research <mafetter@fleming.research>
Mon, 14 Mar 2005 22:10:10 +0000 (22:10 +0000)
Temporary hack for linux 2.6.10 to use shadow mode instead of
writable page tables.

Signed-off-by: michael.fetterman@cl.cam.ac.uk
linux-2.6.10-xen-sparse/arch/xen/i386/mm/hypervisor.c
linux-2.6.10-xen-sparse/arch/xen/i386/mm/init.c
linux-2.6.10-xen-sparse/arch/xen/i386/mm/pgtable.c

index 9068960f051e541524e0773eed185cf8099428f9..a6cbb32231051d668062499af9e2fa5b6abf1b51 100644 (file)
@@ -125,28 +125,14 @@ static inline void increment_index_and_flush(void)
 
 void queue_l1_entry_update(pte_t *ptr, unsigned long val)
 {
-    int cpu = smp_processor_id();
-    int idx;
-    unsigned long flags;
-    spin_lock_irqsave(&update_lock, flags);
-    idx = per_cpu(mmu_update_queue_idx, cpu);
-    per_cpu(update_queue[idx], cpu).ptr = virt_to_machine(ptr);
-    per_cpu(update_queue[idx], cpu).val = val;
-    increment_index();
-    spin_unlock_irqrestore(&update_lock, flags);
+    _flush_page_update_queue();
+    *(unsigned long *)ptr = val;
 }
 
 void queue_l2_entry_update(pmd_t *ptr, unsigned long val)
 {
-    int cpu = smp_processor_id();
-    int idx;
-    unsigned long flags;
-    spin_lock_irqsave(&update_lock, flags);
-    idx = per_cpu(mmu_update_queue_idx, cpu);
-    per_cpu(update_queue[idx], cpu).ptr = virt_to_machine(ptr);
-    per_cpu(update_queue[idx], cpu).val = val;
-    increment_index();
-    spin_unlock_irqrestore(&update_lock, flags);
+    _flush_page_update_queue();
+    *(unsigned long *)ptr = val;
 }
 
 void queue_pt_switch(unsigned long ptr)
@@ -275,28 +261,12 @@ void queue_machphys_update(unsigned long mfn, unsigned long pfn)
 /* queue and flush versions of the above */
 void xen_l1_entry_update(pte_t *ptr, unsigned long val)
 {
-    int cpu = smp_processor_id();
-    int idx;
-    unsigned long flags;
-    spin_lock_irqsave(&update_lock, flags);
-    idx = per_cpu(mmu_update_queue_idx, cpu);
-    per_cpu(update_queue[idx], cpu).ptr = virt_to_machine(ptr);
-    per_cpu(update_queue[idx], cpu).val = val;
-    increment_index_and_flush();
-    spin_unlock_irqrestore(&update_lock, flags);
+    *(unsigned long *)ptr = val;
 }
 
 void xen_l2_entry_update(pmd_t *ptr, unsigned long val)
 {
-    int cpu = smp_processor_id();
-    int idx;
-    unsigned long flags;
-    spin_lock_irqsave(&update_lock, flags);
-    idx = per_cpu(mmu_update_queue_idx, cpu);
-    per_cpu(update_queue[idx], cpu).ptr = virt_to_machine(ptr);
-    per_cpu(update_queue[idx], cpu).val = val;
-    increment_index_and_flush();
-    spin_unlock_irqrestore(&update_lock, flags);
+    *(unsigned long *)ptr = val;
 }
 
 void xen_pt_switch(unsigned long ptr)
index 70287cf4532f3c9f41e41f05ee6c1864a1a1c99d..b104145e11eb70ffc88e31b68ad1c89ed58fe444 100644 (file)
@@ -77,7 +77,7 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
 {
        if (pmd_none(*pmd)) {
                pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
-               make_page_readonly(page_table);
+               //make_page_readonly(page_table);
                set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
                if (page_table != pte_offset_kernel(pmd, 0))
                        BUG();  
@@ -349,7 +349,7 @@ static void __init pagetable_init (void)
         * it. We clean up by write-enabling and then freeing the old page dir.
         */
        memcpy(new_pgd, old_pgd, PTRS_PER_PGD_NO_HV*sizeof(pgd_t));
-       make_page_readonly(new_pgd);
+       //make_page_readonly(new_pgd);
        queue_pgd_pin(__pa(new_pgd));
        load_cr3(new_pgd);
        queue_pgd_unpin(__pa(old_pgd));
index 0d8f833bd954b8743bd984dba1c8cea35e7b6785..e83f45c05de9436a4e868222758485205c816a62 100644 (file)
@@ -181,7 +181,7 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
        pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
        if (pte) {
                clear_page(pte);
-               make_page_readonly(pte);
+               //make_page_readonly(pte);
                xen_flush_page_update_queue();
        }
        return pte;
@@ -194,7 +194,7 @@ void pte_ctor(void *pte, kmem_cache_t *cache, unsigned long unused)
        set_page_count(page, 1);
 
        clear_page(pte);
-       make_page_readonly(pte);
+       //make_page_readonly(pte);
        queue_pte_pin(__pa(pte));
        flush_page_update_queue();
 }
@@ -304,7 +304,7 @@ void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
        spin_unlock_irqrestore(&pgd_lock, flags);
        memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
  out:
-       make_page_readonly(pgd);
+       //make_page_readonly(pgd);
        queue_pgd_pin(__pa(pgd));
        flush_page_update_queue();
 }